# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.757.12.2+1.717.12.4 -> 1.757.12.3 # mm/mmap.c 1.25.1.1 -> 1.28.1.1 # arch/ia64/hp/zx1/hpzx1_misc.c 1.1.1.2 -> 1.4 # Makefile 1.190.1.4 -> 1.197 # drivers/char/agp/agpgart_be.c 1.35.1.2 -> 1.43 # drivers/char/Config.in 1.36.1.2 -> 1.37.1.1 # arch/i386/kernel/mpparse.c 1.9.2.2 -> 1.9.3.1 # arch/i386/kernel/io_apic.c 1.17.2.2 -> 1.17.3.1 # arch/i386/kernel/pci-pc.c 1.24.2.1 -> 1.24.3.1 # include/linux/pci_ids.h 1.44.1.2 -> 1.48 # mm/memory.c 1.51.1.1 -> 1.53.1.1 # drivers/net/eepro100.c 1.36.2.10 -> 1.39 # Documentation/Configure.help 1.128.12.3 -> 1.128.1.13 # arch/i386/kernel/pci-irq.c 1.16.2.1 -> 1.16.3.1 # diff -Nru a/Documentation/Configure.help b/Documentation/Configure.help --- a/Documentation/Configure.help Wed Oct 8 09:08:05 2003 +++ b/Documentation/Configure.help Wed Oct 8 09:08:05 2003 @@ -18360,70 +18360,64 @@ will issue the hlt instruction if nothing is to be done, thereby sending the processor to sleep and saving power. -ACPI support -CONFIG_ACPI - ACPI/OSPM support for Linux is currently under development. As such, - this support is preliminary and EXPERIMENTAL. Configuring ACPI - support enables kernel interfaces that allow higher level software - (OSPM) to manipulate ACPI defined hardware and software interfaces, - including the evaluation of ACPI control methods. If unsure, choose - N here. Note, this option will enlarge your kernel by about 120K. - - This support requires an ACPI compliant platform (hardware/firmware). - If both ACPI and Advanced Power Management (APM) support are - configured, whichever is loaded first shall be used. - - This code DOES NOT currently provide a complete OSPM implementation - -- it has not yet reached APM's level of functionality. When fully - implemented, Linux ACPI/OSPM will provide a more robust functional - replacement for legacy configuration and power management - interfaces, including the Plug-and-Play BIOS specification (PnP - BIOS), the Multi-Processor Specification (MPS), and the Advanced - Power Management specification (APM). - - Linux support for ACPI/OSPM is based on Intel Corporation's ACPI - Component Architecture (ACPI CA). The latest ACPI CA source code, - documentation, debug builds, and implementation status information - can be downloaded from: - . - - The ACPI Sourceforge project may also be of interest: - - -Enable ACPI 2.0 with errata 1.3 -CONFIG_ACPI20 - Enable support for the 2.0 version of the ACPI interpreter. See the - help for ACPI for caveats and discussion. - -ACPI kernel configuration manager -CONFIG_ACPI_KERNEL_CONFIG - If you say `Y' here, Linux's ACPI support will use the - hardware-level system descriptions found on IA64 machines. - -ACPI Debug Statements -CONFIG_ACPI_DEBUG - The ACPI driver can optionally report errors with a great deal - of verbosity. Saying Y enables these statements. This will increase - your kernel size by around 50K. +ACPI Support +CONFIG_ACPI_ENABLE + Advanced Configuration and Power Interface (ACPI) support for + Linux requires an ACPI compliant platform (hardware/firmware), + and assumes the presence of OS-directed configuration and power + management (OSPM) software. This option will enlarge your + kernel by about 70K. + + Linux ACPI provides a robust functional replacement for several + legacy configuration and power management intefaces, including + the Plug-and-Play BIOS specification (PnP BIOS), the + MultiProcessor Specification (MPS), and the Advanced Power + Management (APM) specification. If both ACPI and APM support + are configured, whichever is loaded first shall be used. + + Add "acpi=off" to the kernel command line to disable this feature. + (Try "man bootparam" or see the documentation of your boot loader + about how to pass options to the kernel at boot time.) + + Add "acpi=ht-only" to the kernel command line to limit ACPI + support to processor enumeration only (see CONFIG_ACPI_HT_ONLY). + + ---------- + + The ACPI SourceForge project contains the latest source code, + documentation, tools, mailing list subscription, and other + information. This project is available at: + + + Linux support for ACPI is based on Intel Corporation's ACPI + Component Architecture (ACPI CA). For more information see: + + + ACPI is an open industry specification co-developed by Compaq, + Intel, Microsoft, Phoenix, and Toshiba. The specification is + available at: + + +CONFIG_ACPI_HT_ONLY + This option enables limited ACPI support -- just enough to + enumerate processors from the ACPI Multiple APIC Description + Table (MADT). Note that ACPI supports both logical (e.g. Hyper- + Threading) and physical processors, where the MultiProcessor + Specification (MPS) table only supports physical processors. -ACPI Bus Manager -CONFIG_ACPI_BUSMGR - The ACPI Bus Manager enumerates devices in the ACPI namespace, and - handles PnP messages. All ACPI devices use its services, so using - them requires saying Y here. + Full ACPI support (CONFIG_ACPI) is preferred. Use this option + only if you wish to limit ACPI's role to processor enumeration. -ACPI System Driver -CONFIG_ACPI_SYS - This driver will enable your system to shut down using ACPI, and - dump your ACPI DSDT table using /proc/acpi/dsdt. +CONFIG_ACPI_AC + This driver adds support for the AC Adapter object, which indicates + whether a system is on AC, or not. Typically, only mobile systems + have this object, since desktops are always on AC. -ACPI Processor Driver -CONFIG_ACPI_CPU - This driver installs ACPI as the idle handler for Linux, and uses - ACPI C2 and C3 processor states to save power, on systems that - support it. +CONFIG_ACPI_BATTERY + This driver adds support for battery information through + /proc/acpi/battery. If you have a mobile system with a battery, + say Y. -ACPI Button CONFIG_ACPI_BUTTON This driver registers for events based on buttons, such as the power, sleep, and lid switch. In the future, a daemon will read @@ -18431,27 +18425,34 @@ down the system. Until then, you can cat it, and see output when a button is pressed. -ACPI AC Adapter -CONFIG_ACPI_AC - This driver adds support for the AC Adapter object, which indicates - whether a system is on AC, or not. Typically, only laptops have - this object, since desktops are always on AC. - -ACPI Embedded Controller CONFIG_ACPI_EC This driver is required on some systems for the proper operation of - the battery and thermal drivers. If you are compiling for a laptop, - say Y. + the battery and thermal drivers. If you are compiling for a + mobile system, say Y. -ACPI Control Method Battery -CONFIG_ACPI_CMBATT - This driver adds support for battery information through - /proc/acpi/battery. If you have a laptop with a battery, say Y. +CONFIG_ACPI_PROCESSOR + This driver installs ACPI as the idle handler for Linux, and uses + ACPI C2 and C3 processor states to save power, on systems that + support it. -ACPI Thermal CONFIG_ACPI_THERMAL - This driver handles overheating conditions on laptops. It is HIGHLY - recommended, as your laptop CPU may be damaged without it. + This driver adds support for ACPI thermal zones. Most mobile and + some desktop systems support ACPI thermal zones. It is HIGHLY + recommended that this option be enabled, as your processor(s) + may be damaged without it. + +CONFIG_ACPI_FAN + This driver adds support for ACPI fan devices, allowing user-mode + applications to perform basic fan control (on, off, status). + +CONFIG_ACPI_SYSTEM + This driver will enable your system to shut down using ACPI, and + dump your ACPI DSDT table using /proc/acpi/dsdt. + +CONFIG_ACPI_DEBUG + The ACPI driver can optionally report errors with a great deal + of verbosity. Saying Y enables these statements. This will increase + your kernel size by around 50K. Advanced Power Management BIOS support CONFIG_APM @@ -25498,12 +25499,31 @@ and restore instructions. It's useful for tracking down spinlock problems, but slow! If you're unsure, select N. -Early printk support (requires VGA!) +Early printk support CONFIG_IA64_EARLY_PRINTK - Selecting this option uses the VGA screen for printk() output before - the consoles are initialised. It is useful for debugging problems - early in the boot process, but only if you have a VGA screen - attached. If you're unsure, select N. + Selecting this option uses a UART or VGA screen (or both) for + printk() output before the consoles are initialised. It is useful + for debugging problems early in the boot process, but only if you + have a serial terminal or a VGA screen attached. If you're unsure, + select N. + +Early printk on serial port +CONFIG_IA64_EARLY_PRINTK_UART + Select this option to use a serial port for early printk() output. + You must also select either CONFIG_IA64_EARLY_PRINTK_UART_BASE or + CONFIG_SERIAL_HCDP. If you select CONFIG_SERIAL_HCDP, early + printk() output will appear on the first console device described by + the HCDP. If you set CONFIG_IA64_EARLY_PRINTK_UART_BASE, the HCDP + will be ignored. + +UART base address +CONFIG_IA64_EARLY_PRINTK_UART_BASE + The physical MMIO address of the UART to use for early printk(). + This overrides any UART located using the EFI HCDP table. + +Early printk on VGA +CONFIG_IA64_EARLY_PRINTK_VGA + Select this option to use VGA for early printk() output. Print possible IA64 hazards to console CONFIG_IA64_PRINT_HAZARDS diff -Nru a/Makefile b/Makefile --- a/Makefile Wed Oct 8 09:08:05 2003 +++ b/Makefile Wed Oct 8 09:08:05 2003 @@ -90,6 +90,7 @@ CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -Wno-trigraphs -O2 \ -fno-strict-aliasing -fno-common +CFLAGS += -g ifndef CONFIG_FRAME_POINTER CFLAGS += -fomit-frame-pointer endif @@ -300,8 +301,7 @@ $(CONFIG_SHELL) scripts/Configure -d arch/$(ARCH)/config.in xconfig: symlinks - $(MAKE) -C scripts kconfig.tk - wish -f scripts/kconfig.tk + @echo -e "***\n* Sorry, xconfig is broken; use \"make menuconfig\" instead.\n***" menuconfig: include/linux/version.h symlinks $(MAKE) -C scripts/lxdialog all diff -Nru a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c --- a/arch/i386/kernel/io_apic.c Wed Oct 8 09:08:05 2003 +++ b/arch/i386/kernel/io_apic.c Wed Oct 8 09:08:05 2003 @@ -17,6 +17,7 @@ * thanks to Eric Gilmore * and Rolf G. Tews * for testing these extensively + * Paul Diefenbaugh : Added full ACPI support */ #include @@ -28,6 +29,7 @@ #include #include #include +#include #include #include @@ -1052,6 +1054,10 @@ unsigned char old_id; unsigned long flags; + if (acpi_ioapic) + /* This gets done during IOAPIC enumeration for ACPI. */ + return; + if (clustered_apic_mode) /* We don't have a good way to do this yet - hack */ phys_id_present_map = (u_long) 0xf; @@ -1650,8 +1656,7 @@ printk("ENABLING IO-APIC IRQs\n"); /* - * Set up the IO-APIC IRQ routing table by parsing the MP-BIOS - * mptable: + * Set up IO-APIC IRQ routing. */ setup_ioapic_ids_from_mpc(); sync_Arb_IDs(); @@ -1660,3 +1665,159 @@ check_timer(); print_IO_APIC(); } + + +/* -------------------------------------------------------------------------- + ACPI-based IOAPIC Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_BOOT + +#define IO_APIC_MAX_ID 15 + +int __init io_apic_get_unique_id (int ioapic, int apic_id) +{ + struct IO_APIC_reg_00 reg_00; + static unsigned long apic_id_map = 0; + unsigned long flags; + int i = 0; + + /* + * The P4 platform supports up to 256 APIC IDs on two separate APIC + * buses (one for LAPICs, one for IOAPICs), where predecessors only + * supports up to 16 on one shared APIC bus. + * + * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full + * advantage of new APIC bus architecture. + */ + + if (!apic_id_map) + apic_id_map = phys_cpu_present_map; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_00 = io_apic_read(ioapic, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + if (apic_id >= IO_APIC_MAX_ID) { + printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " + "%d\n", ioapic, apic_id, reg_00.ID); + apic_id = reg_00.ID; + } + + /* + * Every APIC in a system must have a unique ID or we get lots of nice + * 'stuck on smp_invalidate_needed IPI wait' messages. + */ + if (apic_id_map & (1 << apic_id)) { + + for (i = 0; i < IO_APIC_MAX_ID; i++) { + if (!(apic_id_map & (1 << i))) + break; + } + + if (i == IO_APIC_MAX_ID) + panic("Max apic_id exceeded!\n"); + + printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " + "trying %d\n", ioapic, apic_id, i); + + apic_id = i; + } + + apic_id_map |= (1 << apic_id); + + if (reg_00.ID != apic_id) { + reg_00.ID = apic_id; + + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic, 0, *(int *)®_00); + *(int *)®_00 = io_apic_read(ioapic, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + /* Sanity check */ + if (reg_00.ID != apic_id) + panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic); + } + + printk(KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); + + return apic_id; +} + + +int __init io_apic_get_version (int ioapic) +{ + struct IO_APIC_reg_01 reg_01; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_01 = io_apic_read(ioapic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.version; +} + + +int __init io_apic_get_redir_entries (int ioapic) +{ + struct IO_APIC_reg_01 reg_01; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_01 = io_apic_read(ioapic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.entries; +} + + +int io_apic_set_pci_routing (int ioapic, int pin, int irq) +{ + struct IO_APIC_route_entry entry; + unsigned long flags; + + if (!IO_APIC_IRQ(irq)) { + printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0/n", + ioapic); + return -EINVAL; + } + + /* + * Generate a PCI IRQ routing entry and program the IOAPIC accordingly. + * Note that we mask (disable) IRQs now -- these get enabled when the + * corresponding device driver registers for this IRQ. + */ + + memset(&entry,0,sizeof(entry)); + + entry.delivery_mode = dest_LowestPrio; + entry.dest_mode = INT_DELIVERY_MODE; + entry.dest.logical.logical_dest = TARGET_CPUS; + entry.mask = 1; /* Disabled (masked) */ + entry.trigger = 1; /* Level sensitive */ + entry.polarity = 1; /* Low active */ + + add_pin_to_irq(irq, ioapic, pin); + + entry.vector = assign_irq_vector(irq); + + printk(KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> " + "IRQ %d)\n", ioapic, + mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq); + + irq_desc[irq].handler = &ioapic_level_irq_type; + + set_intr_gate(entry.vector, interrupt[irq]); + + if (!ioapic && (irq < 16)) + disable_8259A_irq(irq); + + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); + io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0)); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return entry.vector; +} + +#endif /*CONFIG_ACPI_BOOT*/ diff -Nru a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c --- a/arch/i386/kernel/mpparse.c Wed Oct 8 09:08:05 2003 +++ b/arch/i386/kernel/mpparse.c Wed Oct 8 09:08:05 2003 @@ -9,12 +9,14 @@ * Erich Boleyn : MP v1.4 and additional changes. * Alan Cox : Added EBDA scanning * Ingo Molnar : various cleanups and rewrites - * Maciej W. Rozycki : Bits for default MP configurations + * Maciej W. Rozycki: Bits for default MP configurations + * Paul Diefenbaugh: Added full ACPI support */ #include #include #include +#include #include #include #include @@ -23,10 +25,12 @@ #include #include +#include #include #include #include #include +#include /* Have we found an MP table */ int smp_found_config; @@ -127,12 +131,6 @@ return n; } -#ifdef CONFIG_X86_IO_APIC -extern int have_acpi_tables; /* set by acpitable.c */ -#else -#define have_acpi_tables (0) -#endif - /* * Have to match translation table entries to main table entries by counter * hence the mpc_record variable .... can't see a less disgusting way of @@ -226,7 +224,7 @@ if (m->mpc_apicid > MAX_APICS) { printk("Processor #%d INVALID. (Max ID: %d).\n", m->mpc_apicid, MAX_APICS); - --num_processors; + --num_processors; return; } ver = m->mpc_apicver; @@ -433,10 +431,11 @@ printk("APIC at: 0x%lX\n",mpc->mpc_lapic); - /* save the local APIC address, it might be non-default, - * but only if we're not using the ACPI tables + /* + * Save the local APIC address (it might be non-default) -- but only + * if we're not using ACPI. */ - if (!have_acpi_tables) + if (!acpi_lapic) mp_lapic_addr = mpc->mpc_lapic; if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) && mpc->mpc_oemptr) { @@ -455,9 +454,8 @@ { struct mpc_config_processor *m= (struct mpc_config_processor *)mpt; - - /* ACPI may already have provided this one for us */ - if (!have_acpi_tables) + /* ACPI may have already provided this data */ + if (!acpi_lapic) MP_processor_info(m); mpt += sizeof(*m); count += sizeof(*m); @@ -676,7 +674,6 @@ } static struct intel_mp_floating *mpf_found; -extern void config_acpi_tables(void); /* * Scan the memory blocks for an SMP configuration block. @@ -685,17 +682,19 @@ { struct intel_mp_floating *mpf = mpf_found; -#ifdef CONFIG_X86_IO_APIC /* - * Check if the ACPI tables are provided. Use them only to get - * the processor information, mainly because it provides - * the info on the logical processor(s), rather than the physical - * processor(s) that are provided by the MPS. We attempt to - * check only if the user provided a commandline override + * ACPI may be used to obtain the entire SMP configuration or just to + * enumerate/configure processors (CONFIG_ACPI_HT_ONLY). Note that + * ACPI supports both logical (e.g. Hyper-Threading) and physical + * processors, where MPS only supports physical. */ - config_acpi_tables(); -#endif - + if (acpi_lapic && acpi_ioapic) { + printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); + return; + } + else if (acpi_lapic) + printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); + printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); if (mpf->mpf_feature2 & (1<<7)) { printk(" IMCR and PIC compatibility mode.\n"); @@ -813,11 +812,13 @@ * trustworthy, simply because the SMP table may have been * stomped on during early boot. These loaders are buggy and * should be fixed. + * + * MP1.4 SPEC states to only scan first 1K of 4K EBDA. */ address = *(unsigned short *)phys_to_virt(0x40E); address <<= 4; - smp_scan_config(address, 0x1000); + smp_scan_config(address, 0x400); if (smp_found_config) printk(KERN_WARNING "WARNING: MP table in the EBDA can be UNSAFE, contact linux-smp@vger.kernel.org if you experience SMP problems!\n"); } @@ -856,3 +857,315 @@ #endif } + +/* -------------------------------------------------------------------------- + ACPI-based MP Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_BOOT + +void __init mp_register_lapic_address ( + u64 address) +{ + mp_lapic_addr = (unsigned long) address; + + set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); + + if (boot_cpu_physical_apicid == -1U) + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + + Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); +} + + +void __init mp_register_lapic ( + u8 id, + u8 enabled) +{ + struct mpc_config_processor processor; + int boot_cpu = 0; + + if (id >= MAX_APICS) { + printk(KERN_WARNING "Processor #%d invalid (max %d)\n", + id, MAX_APICS); + return; + } + + if (id == boot_cpu_physical_apicid) + boot_cpu = 1; + + processor.mpc_type = MP_PROCESSOR; + processor.mpc_apicid = id; + processor.mpc_apicver = 0x10; /* TBD: lapic version */ + processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); + processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); + processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; + processor.mpc_reserved[0] = 0; + processor.mpc_reserved[1] = 0; + + MP_processor_info(&processor); +} + +#ifdef CONFIG_X86_IO_APIC + +#define MP_ISA_BUS 0 +#define MP_MAX_IOAPIC_PIN 127 + +struct mp_ioapic_routing { + int apic_id; + int irq_start; + int irq_end; + u32 pin_programmed[4]; +} mp_ioapic_routing[MAX_IO_APICS]; + + +static int __init mp_find_ioapic ( + int irq) +{ + int i = 0; + + /* Find the IOAPIC that manages this IRQ. */ + for (i = 0; i < nr_ioapics; i++) { + if ((irq >= mp_ioapic_routing[i].irq_start) + && (irq <= mp_ioapic_routing[i].irq_end)) + return i; + } + + printk(KERN_ERR "ERROR: Unable to locate IOAPIC for IRQ %d/n", irq); + + return -1; +} + + +void __init mp_register_ioapic ( + u8 id, + u32 address, + u32 irq_base) +{ + int idx = 0; + + if (nr_ioapics >= MAX_IO_APICS) { + printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " + "(found %d)\n", MAX_IO_APICS, nr_ioapics); + panic("Recompile kernel with bigger MAX_IO_APICS!\n"); + } + if (!address) { + printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" + " found in MADT table, skipping!\n"); + return; + } + + idx = nr_ioapics++; + + mp_ioapics[idx].mpc_type = MP_IOAPIC; + mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; + mp_ioapics[idx].mpc_apicaddr = address; + + set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); + mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); + mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); + + /* + * Build basic IRQ lookup table to facilitate irq->io_apic lookups + * and to prevent reprogramming of IOAPIC pins (PCI IRQs). + */ + mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; + mp_ioapic_routing[idx].irq_start = irq_base; + mp_ioapic_routing[idx].irq_end = irq_base + + io_apic_get_redir_entries(idx); + + printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " + "IRQ %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, + mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, + mp_ioapic_routing[idx].irq_start, + mp_ioapic_routing[idx].irq_end); + + return; +} + + +void __init mp_override_legacy_irq ( + u8 bus_irq, + u8 polarity, + u8 trigger, + u32 global_irq) +{ + struct mpc_config_intsrc intsrc; + int i = 0; + int found = 0; + int ioapic = -1; + int pin = -1; + + /* + * Convert 'global_irq' to 'ioapic.pin'. + */ + ioapic = mp_find_ioapic(global_irq); + if (ioapic < 0) + return; + pin = global_irq - mp_ioapic_routing[ioapic].irq_start; + + /* + * TBD: This check is for faulty timer entries, where the override + * erroneously sets the trigger to level, resulting in a HUGE + * increase of timer interrupts! + */ + if ((bus_irq == 0) && (global_irq == 2) && (trigger == 3)) + trigger = 1; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqtype = mp_INT; + intsrc.mpc_irqflag = (trigger << 2) | polarity; + intsrc.mpc_srcbus = MP_ISA_BUS; + intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ + intsrc.mpc_dstirq = pin; /* INTIN# */ + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", + intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); + + /* + * If an existing [IOAPIC.PIN -> IRQ] routing entry exists we override it. + * Otherwise create a new entry (e.g. global_irq == 2). + */ + for (i = 0; i < mp_irq_entries; i++) { + if ((mp_irqs[i].mpc_dstapic == intsrc.mpc_dstapic) + && (mp_irqs[i].mpc_dstirq == intsrc.mpc_dstirq)) { + mp_irqs[i] = intsrc; + found = 1; + break; + } + } + if (!found) { + mp_irqs[mp_irq_entries] = intsrc; + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!\n"); + } + + return; +} + + +void __init mp_config_acpi_legacy_irqs (void) +{ + struct mpc_config_intsrc intsrc; + int i = 0; + int ioapic = -1; + + /* + * Fabricate the legacy ISA bus (bus #31). + */ + mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; + Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); + + /* + * Locate the IOAPIC that manages the ISA IRQs (0-15). + */ + ioapic = mp_find_ioapic(0); + if (ioapic < 0) + return; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqflag = 0; /* Conforming */ + intsrc.mpc_srcbus = MP_ISA_BUS; + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; + + /* + * Use the default configuration for the IRQs 0-15. These may be + * overriden by (MADT) interrupt source override entries. + */ + for (i = 0; i < 16; i++) { + + if (i == 2) continue; /* Don't connect IRQ2 */ + + intsrc.mpc_irqtype = i ? mp_INT : mp_ExtINT; /* 8259A to #0 */ + intsrc.mpc_srcbusirq = i; /* Identity mapped */ + intsrc.mpc_dstirq = i; + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " + "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, + intsrc.mpc_dstirq); + + mp_irqs[mp_irq_entries] = intsrc; + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!\n"); + } + + return; +} + +#endif /*CONFIG_X86_IO_APIC*/ + +#ifdef CONFIG_ACPI_PCI + +void __init mp_parse_prt (void) +{ + struct list_head *node = NULL; + struct acpi_prt_entry *entry = NULL; + int vector = 0; + int ioapic = -1; + int ioapic_pin = 0; + int irq = 0; + int idx, bit = 0; + + /* + * Parsing through the PCI Interrupt Routing Table (PRT) and program + * routing for all static (IOAPIC-direct) entries. + */ + list_for_each(node, &acpi_prt.entries) { + entry = list_entry(node, struct acpi_prt_entry, node); + + /* We're only interested in static (non-link) entries. */ + if (entry->link.handle) + continue; + + irq = entry->link.index; + ioapic = mp_find_ioapic(irq); + if (ioapic < 0) + continue; + ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start; + + /* + * Avoid pin reprogramming. PRTs typically include entries + * with redundant pin->irq mappings (but unique PCI devices); + * we only only program the IOAPIC on the first. + */ + bit = ioapic_pin % 32; + idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); + if (idx > 3) { + printk(KERN_ERR "Invalid reference to IOAPIC pin " + "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, + ioapic_pin); + continue; + } + if ((1<irq = irq; + continue; + } + + mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<irq = irq; + + printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d -> vector 0x%02x" + " -> IRQ %d\n", entry->id.segment, entry->id.bus, + entry->id.device, ('A' + entry->pin), + mp_ioapic_routing[ioapic].apic_id, ioapic_pin, vector, + entry->irq); + } + + return; +} + +#endif /*CONFIG_ACPI_PCI*/ + +#endif /*CONFIG_ACPI_BOOT*/ diff -Nru a/arch/i386/kernel/pci-irq.c b/arch/i386/kernel/pci-irq.c --- a/arch/i386/kernel/pci-irq.c Wed Oct 8 09:08:05 2003 +++ b/arch/i386/kernel/pci-irq.c Wed Oct 8 09:08:05 2003 @@ -115,7 +115,7 @@ * Code for querying and setting of IRQ routes on various interrupt routers. */ -static void eisa_set_level_irq(unsigned int irq) +void eisa_set_level_irq(unsigned int irq) { unsigned char mask = 1 << (irq & 7); unsigned int port = 0x4d0 + (irq >> 3); diff -Nru a/arch/i386/kernel/pci-pc.c b/arch/i386/kernel/pci-pc.c --- a/arch/i386/kernel/pci-pc.c Wed Oct 8 09:08:05 2003 +++ b/arch/i386/kernel/pci-pc.c Wed Oct 8 09:08:05 2003 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -38,6 +39,8 @@ #define QUADLOCAL2BUS(quad,local) (local) #endif +static int pci_using_acpi_prt = 0; + /* * This interrupt-safe spinlock protects all accesses to PCI * configuration space. @@ -1353,6 +1356,23 @@ pci_read_bridge_bases(b); } +struct pci_bus * __devinit pcibios_scan_root(int busnum) +{ + struct list_head *list; + struct pci_bus *bus; + + list_for_each(list, &pci_root_buses) { + bus = pci_bus_b(list); + if (bus->number == busnum) { + /* Already scanned */ + return bus; + } + } + + printk("PCI: Probing PCI hardware (bus %02x)\n", busnum); + + return pci_scan_bus(busnum, pci_root_ops, NULL); +} void __devinit pcibios_config_init(void) { @@ -1405,20 +1425,26 @@ return; } - printk(KERN_INFO "PCI: Probing PCI hardware\n"); - pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL); - if (clustered_apic_mode && (numnodes > 1)) { - for (quad = 1; quad < numnodes; ++quad) { - printk("Scanning PCI bus %d for quad %d\n", - QUADLOCAL2BUS(quad,0), quad); - pci_scan_bus(QUADLOCAL2BUS(quad,0), - pci_root_ops, NULL); +#ifdef CONFIG_ACPI_PCI + if (acpi_pci_irq_init() > 0) + pci_using_acpi_prt = 1; +#endif + if (!pci_using_acpi_prt) { + pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL); + if (clustered_apic_mode && (numnodes > 1)) { + for (quad = 1; quad < numnodes; ++quad) { + printk("Scanning PCI bus %d for quad %d\n", + QUADLOCAL2BUS(quad,0), quad); + pci_scan_bus(QUADLOCAL2BUS(quad,0), + pci_root_ops, NULL); + } } + + pcibios_irq_init(); + pcibios_fixup_peer_bridges(); + pcibios_fixup_irqs(); } - pcibios_irq_init(); - pcibios_fixup_peer_bridges(); - pcibios_fixup_irqs(); pcibios_resource_survey(); #ifdef CONFIG_PCI_BIOS @@ -1485,6 +1511,15 @@ if ((err = pcibios_enable_resources(dev, mask)) < 0) return err; + +#ifdef CONFIG_ACPI_PCI + if (pci_using_acpi_prt) { + acpi_pci_irq_enable(dev); + return 0; + } +#endif + pcibios_enable_irq(dev); + return 0; } diff -Nru a/drivers/char/Config.in b/drivers/char/Config.in --- a/drivers/char/Config.in Wed Oct 8 09:08:05 2003 +++ b/drivers/char/Config.in Wed Oct 8 09:08:05 2003 @@ -288,6 +288,7 @@ bool ' ALI chipset support' CONFIG_AGP_ALI bool ' Serverworks LE/HE support' CONFIG_AGP_SWORKS if [ "$CONFIG_IA64" = "y" ]; then + bool ' Intel 460GX support' CONFIG_AGP_I460 bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1 fi fi diff -Nru a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c --- a/drivers/char/agp/agpgart_be.c Wed Oct 8 09:08:05 2003 +++ b/drivers/char/agp/agpgart_be.c Wed Oct 8 09:08:05 2003 @@ -43,6 +43,9 @@ #include #include #include +#include +#include +#include #include #include "agp.h" @@ -207,11 +210,14 @@ agp_bridge.free_by_type(curr); return; } - if (curr->page_count != 0) { - for (i = 0; i < curr->page_count; i++) { - curr->memory[i] &= ~(0x00000fff); - agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(curr->memory[i])); + if (agp_bridge.cant_use_aperture) { + vfree(curr->vmptr); + } else { + if (curr->page_count != 0) { + for (i = 0; i < curr->page_count; i++) { + agp_bridge.agp_destroy_page((unsigned long) + phys_to_virt(curr->memory[i])); + } } } agp_free_key(curr->key); @@ -220,6 +226,37 @@ MOD_DEC_USE_COUNT; } +#define IN_VMALLOC(_x) (((_x) >= VMALLOC_START) && ((_x) < VMALLOC_END)) + +/* + * Look up and return the pte corresponding to addr. We only do this for + * agp_ioremap'ed addresses. + */ +static pte_t *agp_lookup_pte(unsigned long addr) +{ + pgd_t *dir; + pmd_t *pmd; + pte_t *pte; + + if (!IN_VMALLOC(addr)) + return NULL; + + dir = pgd_offset_k(addr); + pmd = pmd_offset(dir, addr); + + if (pmd) { + pte = pte_offset(pmd, addr); + + if (pte) { + return pte; + } else { + return NULL; + } + } else { + return NULL; + } +} + #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) agp_memory *agp_allocate_memory(size_t page_count, u32 type) @@ -254,21 +291,44 @@ MOD_DEC_USE_COUNT; return NULL; } - for (i = 0; i < page_count; i++) { - new->memory[i] = agp_bridge.agp_alloc_page(); - if (new->memory[i] == 0) { - /* Free this structure */ - agp_free_memory(new); + if (agp_bridge.cant_use_aperture) { + void *vmblock; + unsigned long vaddr; + pte_t *pte; + + vmblock = __vmalloc(page_count << PAGE_SHIFT, GFP_KERNEL, PAGE_KERNEL); + if (vmblock == NULL) { + MOD_DEC_USE_COUNT; return NULL; } - new->memory[i] = - agp_bridge.mask_memory( - virt_to_phys((void *) new->memory[i]), - type); - new->page_count++; - } + new->vmptr = vmblock; + vaddr = (unsigned long) vmblock; + + for (i = 0; i < page_count; i++, vaddr += PAGE_SIZE) { + pte = agp_lookup_pte(vaddr); + if (pte == NULL) { + MOD_DEC_USE_COUNT; + return NULL; + } + new->memory[i] = virt_to_phys(page_address(pte_page(*pte))); + } + + new->page_count = page_count; + } else { + for (i = 0; i < page_count; i++) { + new->memory[i] = agp_bridge.agp_alloc_page(); + + if (new->memory[i] == 0) { + /* Free this structure */ + agp_free_memory(new); + return NULL; + } + new->memory[i] = virt_to_phys((void *) new->memory[i]); + new->page_count++; + } + } return new; } @@ -311,9 +371,6 @@ int agp_copy_info(agp_kern_info * info) { - unsigned long page_mask = 0; - int i; - memset(info, 0, sizeof(agp_kern_info)); if (agp_bridge.type == NOT_SUPPORTED) { info->chipset = agp_bridge.type; @@ -329,11 +386,7 @@ info->max_memory = agp_bridge.max_memory_agp; info->current_memory = atomic_read(&agp_bridge.current_memory_agp); info->cant_use_aperture = agp_bridge.cant_use_aperture; - - for(i = 0; i < agp_bridge.num_of_masks; i++) - page_mask |= agp_bridge.mask_memory(page_mask, i); - - info->page_mask = ~page_mask; + info->page_mask = ~0UL; return 0; } @@ -731,7 +784,8 @@ mem->is_flushed = TRUE; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - agp_bridge.gatt_table[j] = mem->memory[i]; + agp_bridge.gatt_table[j] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); @@ -976,7 +1030,8 @@ CACHE_FLUSH(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { OUTREG32(intel_i810_private.registers, - I810_PTE_BASE + (j * 4), mem->memory[i]); + I810_PTE_BASE + (j * 4), + agp_bridge.mask_memory(mem->memory[i], mem->type)); } CACHE_FLUSH(); @@ -1042,10 +1097,7 @@ agp_free_memory(new); return NULL; } - new->memory[0] = - agp_bridge.mask_memory( - virt_to_phys((void *) new->memory[0]), - type); + new->memory[0] = virt_to_phys((void *) new->memory[0]); new->page_count = 1; new->num_scratch_pages = 1; new->type = AGP_PHYS_MEMORY; @@ -1079,7 +1131,6 @@ intel_i810_private.i810_dev = i810_dev; agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 2; agp_bridge.aperture_sizes = (void *) intel_i810_sizes; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.num_aperture_sizes = 2; @@ -1282,7 +1333,8 @@ CACHE_FLUSH(); for (i = 0, j = pg_start; i < mem->page_count; i++, j++) - OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4),mem->memory[i]); + OUTREG32(intel_i830_private.registers,I810_PTE_BASE + (j * 4), + agp_bridge.mask_memory(mem->memory[i], mem->type)); CACHE_FLUSH(); @@ -1343,7 +1395,7 @@ return(NULL); } - nw->memory[0] = agp_bridge.mask_memory(virt_to_phys((void *) nw->memory[0]),type); + nw->memory[0] = virt_to_phys((void *) nw->memory[0]); nw->page_count = 1; nw->num_scratch_pages = 1; nw->type = AGP_PHYS_MEMORY; @@ -1359,7 +1411,6 @@ intel_i830_private.i830_dev = i830_dev; agp_bridge.masks = intel_i810_masks; - agp_bridge.num_of_masks = 3; agp_bridge.aperture_sizes = (void *) intel_i830_sizes; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.num_aperture_sizes = 2; @@ -1394,9 +1445,605 @@ #endif /* CONFIG_AGP_I810 */ - #ifdef CONFIG_AGP_INTEL +#ifdef CONFIG_AGP_I460 -#endif /* CONFIG_AGP_I810 */ +/* BIOS configures the chipset so that one of two apbase registers are used */ +static u8 intel_i460_dynamic_apbase = 0x10; + +/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */ +static u8 intel_i460_pageshift = 12; +static u32 intel_i460_pagesize; + +/* Keep track of which is larger, chipset or kernel page size. */ +static u32 intel_i460_cpk = 1; + +/* Structure for tracking partial use of 4MB GART pages */ +static u32 **i460_pg_detail = NULL; +static u32 *i460_pg_count = NULL; + +#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift) +#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT) + +#define I460_SRAM_IO_DISABLE (1 << 4) +#define I460_BAPBASE_ENABLE (1 << 3) +#define I460_AGPSIZ_MASK 0x7 +#define I460_4M_PS (1 << 1) + +#define log2(x) ffz(~(x)) + +static gatt_mask intel_i460_masks[] = +{ + { INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, 0 } +}; + +static unsigned long intel_i460_mask_memory(unsigned long addr, int type) +{ + /* Make sure the returned address is a valid GATT entry */ + return (agp_bridge.masks[0].mask | (((addr & + ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12)); +} + +static unsigned long intel_i460_unmask_memory(unsigned long addr) +{ + /* Turn a GATT entry into a physical address */ + return ((addr & 0xffffff) << 12); +} + +static int intel_i460_fetch_size(void) +{ + int i; + u8 temp; + aper_size_info_8 *values; + + /* Determine the GART page size */ + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp); + intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12; + intel_i460_pagesize = 1UL << intel_i460_pageshift; + + values = A_SIZE_8(agp_bridge.aperture_sizes); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + + /* Exit now if the IO drivers for the GART SRAMS are turned off */ + if (temp & I460_SRAM_IO_DISABLE) { + printk(KERN_WARNING PFX "GART SRAMS disabled on 460GX chipset\n"); + printk(KERN_WARNING PFX "AGPGART operation not possible\n"); + return 0; + } + + /* Make sure we don't try to create an 2 ^ 23 entry GATT */ + if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { + printk(KERN_WARNING PFX "We can't have a 32GB aperture with 4KB" + " GART pages\n"); + return 0; + } + + /* Determine the proper APBASE register */ + if (temp & I460_BAPBASE_ENABLE) + intel_i460_dynamic_apbase = INTEL_I460_BAPBASE; + else + intel_i460_dynamic_apbase = INTEL_I460_APBASE; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + + /* + * Dynamically calculate the proper num_entries and page_order + * values for the define aperture sizes. Take care not to + * shift off the end of values[i].size. + */ + values[i].num_entries = (values[i].size << 8) >> + (intel_i460_pageshift - 12); + values[i].page_order = log2((sizeof(u32)*values[i].num_entries) + >> PAGE_SHIFT); + } + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + /* Neglect control bits when matching up size_value */ + if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +/* There isn't anything to do here since 460 has no GART TLB. */ +static void intel_i460_tlb_flush(agp_memory * mem) +{ + return; +} + +/* + * This utility function is needed to prevent corruption of the control bits + * which are stored along with the aperture size in 460's AGPSIZ register + */ +static void intel_i460_write_agpsiz(u8 size_value) +{ + u8 temp; + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, + ((temp & ~I460_AGPSIZ_MASK) | size_value)); +} + +static void intel_i460_cleanup(void) +{ + aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + intel_i460_write_agpsiz(previous_size->size_value); + + if (intel_i460_cpk == 0) { + vfree(i460_pg_detail); + vfree(i460_pg_count); + } +} + + +/* Control bits for Out-Of-GART coherency and Burst Write Combining */ +#define I460_GXBCTL_OOG (1UL << 0) +#define I460_GXBCTL_BWC (1UL << 2) + +static int intel_i460_configure(void) +{ + union { + u32 small[2]; + u64 large; + } temp; + u8 scratch; + int i; + + aper_size_info_8 *current_size; + + temp.large = 0; + + current_size = A_SIZE_8(agp_bridge.current_size); + intel_i460_write_agpsiz(current_size->size_value); + + /* + * Do the necessary rigmarole to read all eight bytes of APBASE. + * This has to be done since the AGP aperture can be above 4GB on + * 460 based systems. + */ + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, + &(temp.small[0])); + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, + &(temp.small[1])); + + /* Clear BAR control bits */ + agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, + (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); + + /* + * Initialize partial allocation trackers if a GART page is bigger than + * a kernel page. + */ + if (I460_CPAGES_PER_KPAGE >= 1) { + intel_i460_cpk = 1; + } else { + intel_i460_cpk = 0; + + i460_pg_detail = (void *) vmalloc(sizeof(*i460_pg_detail) * + current_size->num_entries); + i460_pg_count = (void *) vmalloc(sizeof(*i460_pg_count) * + current_size->num_entries); + + for (i = 0; i < current_size->num_entries; i++) { + i460_pg_count[i] = 0; + i460_pg_detail[i] = NULL; + } + } + + return 0; +} + +static int intel_i460_create_gatt_table(void) { + + char *table; + int i; + int page_order; + int num_entries; + void *temp; + unsigned int read_back; + + /* + * Load up the fixed address of the GART SRAMS which hold our + * GATT table. + */ + table = (char *) __va(INTEL_I460_ATTBASE); + + temp = agp_bridge.current_size; + page_order = A_SIZE_8(temp)->page_order; + num_entries = A_SIZE_8(temp)->num_entries; + + agp_bridge.gatt_table_real = (u32 *) table; + agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + read_back = agp_bridge.gatt_table[i - 1]; + + return 0; +} + +static int intel_i460_free_gatt_table(void) +{ + int num_entries; + int i; + void *temp; + unsigned int read_back; + + temp = agp_bridge.current_size; + + num_entries = A_SIZE_8(temp)->num_entries; + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + read_back = agp_bridge.gatt_table[i - 1]; + + iounmap(agp_bridge.gatt_table); + + return 0; +} + +/* These functions are called when PAGE_SIZE exceeds the GART page size */ + +static int intel_i460_insert_memory_cpk(agp_memory * mem, + off_t pg_start, int type) +{ + int i, j, k, num_entries; + void *temp; + unsigned long paddr; + unsigned int read_back; + + /* + * The rest of the kernel will compute page offsets in terms of + * PAGE_SIZE. + */ + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) { + printk(KERN_WARNING PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + j = pg_start; + while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + j++; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++) { + + paddr = mem->memory[i]; + + for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize) + agp_bridge.gatt_table[j] = (unsigned int) + agp_bridge.mask_memory(paddr, mem->type); + } + + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + read_back = agp_bridge.gatt_table[j - 1]; + + return 0; +} + +static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + unsigned int read_back; + + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * + mem->page_count); i++) + agp_bridge.gatt_table[i] = 0; + + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + read_back = agp_bridge.gatt_table[i - 1]; + + return 0; +} + +/* + * These functions are called when the GART page size exceeds PAGE_SIZE. + * + * This situation is interesting since AGP memory allocations that are + * smaller than a single GART page are possible. The structures i460_pg_count + * and i460_pg_detail track partial allocation of the large GART pages to + * work around this issue. + * + * i460_pg_count[pg_num] tracks the number of kernel pages in use within + * GART page pg_num. i460_pg_detail[pg_num] is an array containing a + * psuedo-GART entry for each of the aforementioned kernel pages. The whole + * of i460_pg_detail is equivalent to a giant GATT with page size equal to + * that of the kernel. + */ + +static void *intel_i460_alloc_large_page(int pg_num) +{ + int i; + void *bp, *bp_end; + struct page *page; + + i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * + I460_KPAGES_PER_CPAGE); + if (i460_pg_detail[pg_num] == NULL) { + printk(KERN_WARNING PFX "Out of memory, we're in trouble...\n"); + return NULL; + } + + for (i = 0; i < I460_KPAGES_PER_CPAGE; i++) + i460_pg_detail[pg_num][i] = 0; + + bp = (void *) __get_free_pages(GFP_KERNEL, + intel_i460_pageshift - PAGE_SHIFT); + if (bp == NULL) { + printk(KERN_WARNING PFX "Couldn't alloc 4M GART page...\n"); + return NULL; + } + + bp_end = bp + ((PAGE_SIZE * + (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1); + + for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) { + atomic_inc(&page->count); + set_bit(PG_locked, &page->flags); + atomic_inc(&agp_bridge.current_memory_agp); + } + + return bp; +} + +static void intel_i460_free_large_page(int pg_num, unsigned long addr) +{ + struct page *page; + void *bp, *bp_end; + + bp = (void *) __va(addr); + bp_end = bp + (PAGE_SIZE * + (1 << (intel_i460_pageshift - PAGE_SHIFT))); + + vfree(i460_pg_detail[pg_num]); + i460_pg_detail[pg_num] = NULL; + + for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) { + put_page(page); + UnlockPage(page); + atomic_dec(&agp_bridge.current_memory_agp); + } + + free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT); +} + +static int intel_i460_insert_memory_kpc(agp_memory * mem, + off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned int read_back; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / + I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % + I460_KPAGES_PER_CPAGE; + + if (end_pg > num_entries) { + printk(KERN_WARNING PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + /* Check if the requested region of the aperture is free */ + for (pg = start_pg; pg <= end_pg; pg++) { + /* Allocate new GART pages if necessary */ + if (i460_pg_detail[pg] == NULL) { + temp = intel_i460_alloc_large_page(pg); + if (temp == NULL) + return -ENOMEM; + agp_bridge.gatt_table[pg] = agp_bridge.mask_memory( + (unsigned long) temp, 0); + read_back = agp_bridge.gatt_table[pg]; + } + + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) + : I460_KPAGES_PER_CPAGE); + idx++) { + if(i460_pg_detail[pg][idx] != 0) + return -EBUSY; + } + } + + for (pg = start_pg, i = 0; pg <= end_pg; pg++) { + paddr = intel_i460_unmask_memory(agp_bridge.gatt_table[pg]); + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) + : I460_KPAGES_PER_CPAGE); + idx++, i++) { + mem->memory[i] = paddr + (idx * PAGE_SIZE); + i460_pg_detail[pg][idx] = + agp_bridge.mask_memory(mem->memory[i], mem->type); + + i460_pg_count[pg]++; + } + } + + return 0; +} + +static int intel_i460_remove_memory_kpc(agp_memory * mem, + off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned int read_back; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / + I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % + I460_KPAGES_PER_CPAGE; + + for (i = 0, pg = start_pg; pg <= end_pg; pg++) { + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) + : I460_KPAGES_PER_CPAGE); + idx++, i++) { + mem->memory[i] = 0; + i460_pg_detail[pg][idx] = 0; + i460_pg_count[pg]--; + } + + /* Free GART pages if they are unused */ + if (i460_pg_count[pg] == 0) { + paddr = intel_i460_unmask_memory(agp_bridge.gatt_table[pg]); + agp_bridge.gatt_table[pg] = agp_bridge.scratch_page; + read_back = agp_bridge.gatt_table[pg]; + + intel_i460_free_large_page(pg, paddr); + } + } + + return 0; +} + +/* Dummy routines to call the approriate {cpk,kpc} function */ + +static int intel_i460_insert_memory(agp_memory * mem, + off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_insert_memory_cpk(mem, pg_start, type); + else + return intel_i460_insert_memory_kpc(mem, pg_start, type); +} + +static int intel_i460_remove_memory(agp_memory * mem, + off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_remove_memory_cpk(mem, pg_start, type); + else + return intel_i460_remove_memory_kpc(mem, pg_start, type); +} + +/* + * If the kernel page size is smaller that the chipset page size, we don't + * want to allocate memory until we know where it is to be bound in the + * aperture (a multi-kernel-page alloc might fit inside of an already + * allocated GART page). Consequently, don't allocate or free anything + * if i460_cpk (meaning chipset pages per kernel page) isn't set. + * + * Let's just hope nobody counts on the allocated AGP memory being there + * before bind time (I don't think current drivers do)... + */ +static unsigned long intel_i460_alloc_page(void) +{ + if (intel_i460_cpk) + return agp_generic_alloc_page(); + + /* Returning NULL would cause problems */ + return ((unsigned long) ~0UL); +} + +static void intel_i460_destroy_page(unsigned long page) +{ + if (intel_i460_cpk) + agp_generic_destroy_page(page); +} + +static aper_size_info_8 intel_i460_sizes[3] = +{ + /* + * The 32GB aperture is only available with a 4M GART page size. + * Due to the dynamic GART page size, we can't figure out page_order + * or num_entries until runtime. + */ + {32768, 0, 0, 4}, + {1024, 0, 0, 2}, + {256, 0, 0, 1} +}; + +static int __init intel_i460_setup(struct pci_dev *pdev) +{ + agp_bridge.masks = intel_i460_masks; + agp_bridge.aperture_sizes = (void *) intel_i460_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 3; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_i460_configure; + agp_bridge.fetch_size = intel_i460_fetch_size; + agp_bridge.cleanup = intel_i460_cleanup; + agp_bridge.tlb_flush = intel_i460_tlb_flush; + agp_bridge.mask_memory = intel_i460_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = intel_i460_create_gatt_table; + agp_bridge.free_gatt_table = intel_i460_free_gatt_table; + agp_bridge.insert_memory = intel_i460_insert_memory; + agp_bridge.remove_memory = intel_i460_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = intel_i460_alloc_page; + agp_bridge.agp_destroy_page = intel_i460_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 1; + + return 0; + + (void) pdev; /* unused */ +} + +#endif /* CONFIG_AGP_I460 */ #ifdef CONFIG_AGP_INTEL @@ -1841,7 +2488,6 @@ static int __init intel_generic_setup (struct pci_dev *pdev) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_generic_sizes; agp_bridge.size_type = U16_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1874,7 +2520,6 @@ static int __init intel_815_setup (struct pci_dev *pdev) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_815_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 2; @@ -1907,7 +2552,6 @@ static int __init intel_820_setup (struct pci_dev *pdev) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -1940,7 +2584,6 @@ static int __init intel_830mp_setup (struct pci_dev *pdev) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_830mp_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 4; @@ -1972,7 +2615,6 @@ static int __init intel_840_setup (struct pci_dev *pdev) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2005,7 +2647,6 @@ static int __init intel_845_setup (struct pci_dev *pdev) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2038,7 +2679,6 @@ static int __init intel_850_setup (struct pci_dev *pdev) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2071,7 +2711,6 @@ static int __init intel_860_setup (struct pci_dev *pdev) { agp_bridge.masks = intel_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) intel_8xx_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2191,7 +2830,6 @@ static int __init via_generic_setup (struct pci_dev *pdev) { agp_bridge.masks = via_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) via_generic_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2305,7 +2943,6 @@ static int __init sis_generic_setup (struct pci_dev *pdev) { agp_bridge.masks = sis_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) sis_generic_sizes; agp_bridge.size_type = U8_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2643,7 +3280,8 @@ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; cur_gatt = GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; + cur_gatt[GET_GATT_OFF(addr)] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); return 0; @@ -2689,7 +3327,6 @@ static int __init amd_irongate_setup (struct pci_dev *pdev) { agp_bridge.masks = amd_irongate_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) amd_irongate_sizes; agp_bridge.size_type = LVL2_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -2790,7 +3427,7 @@ } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - addr = mem->memory[i]; + addr = agp_bridge.mask_memory(mem->memory[i], mem->type); tmp = addr; BUG_ON(tmp & 0xffffff0000000ffc); @@ -3155,7 +3792,6 @@ static int __init amd_8151_setup (struct pci_dev *pdev) { agp_bridge.masks = amd_8151_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) amd_8151_sizes; agp_bridge.size_type = U32_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -3393,7 +4029,6 @@ static int __init ali_generic_setup (struct pci_dev *pdev) { agp_bridge.masks = ali_generic_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) ali_generic_sizes; agp_bridge.size_type = U32_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -3807,7 +4442,8 @@ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { addr = (j * PAGE_SIZE) + agp_bridge.gart_bus_addr; cur_gatt = SVRWRKS_GET_GATT(addr); - cur_gatt[GET_GATT_OFF(addr)] = mem->memory[i]; + cur_gatt[GET_GATT_OFF(addr)] = + agp_bridge.mask_memory(mem->memory[i], mem->type); } agp_bridge.tlb_flush(mem); return 0; @@ -3964,7 +4600,6 @@ serverworks_private.svrwrks_dev = pdev; agp_bridge.masks = serverworks_masks; - agp_bridge.num_of_masks = 1; agp_bridge.aperture_sizes = (void *) serverworks_sizes; agp_bridge.size_type = LVL2_APER_SIZE; agp_bridge.num_aperture_sizes = 7; @@ -4048,11 +4683,6 @@ {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ }; -static gatt_mask hp_zx1_masks[] = -{ - {HP_ZX1_PDIR_VALID_BIT, 0} -}; - static struct _hp_private { struct pci_dev *ioc; volatile u8 *registers; @@ -4118,7 +4748,7 @@ return 0; } -static int __init hp_zx1_ioc_owner(u8 ioc_rev) +static int __init hp_zx1_ioc_owner(void) { struct _hp_private *hp = &hp_private; @@ -4158,7 +4788,6 @@ struct _hp_private *hp = &hp_private; struct pci_dev *ioc; int i; - u8 ioc_rev; ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL); if (!ioc) { @@ -4167,8 +4796,6 @@ } hp->ioc = ioc; - pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev); - for (i = 0; i < PCI_NUM_RESOURCES; i++) { if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) { hp->registers = (u8 *) ioremap(pci_resource_start(ioc, @@ -4190,7 +4817,7 @@ hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0; if (hp->io_pdir_owner) - return hp_zx1_ioc_owner(ioc_rev); + return hp_zx1_ioc_owner(); return hp_zx1_ioc_shared(); } @@ -4310,11 +4937,6 @@ j++; } - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - for (i = 0, j = io_pg_start; i < mem->page_count; i++) { unsigned long paddr; @@ -4354,15 +4976,8 @@ return HP_ZX1_PDIR_VALID_BIT | addr; } -static unsigned long hp_zx1_unmask_memory(unsigned long addr) -{ - return addr & ~(HP_ZX1_PDIR_VALID_BIT); -} - static int __init hp_zx1_setup (struct pci_dev *pdev) { - agp_bridge.masks = hp_zx1_masks; - agp_bridge.num_of_masks = 1; agp_bridge.dev_private_data = NULL; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.needs_scratch_page = FALSE; @@ -4371,7 +4986,6 @@ agp_bridge.cleanup = hp_zx1_cleanup; agp_bridge.tlb_flush = hp_zx1_tlbflush; agp_bridge.mask_memory = hp_zx1_mask_memory; - agp_bridge.unmask_memory = hp_zx1_unmask_memory; agp_bridge.agp_enable = agp_generic_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = hp_zx1_create_gatt_table; @@ -4590,6 +5204,15 @@ #endif /* CONFIG_AGP_INTEL */ +#ifdef CONFIG_AGP_I460 + { PCI_DEVICE_ID_INTEL_460GX, + PCI_VENDOR_ID_INTEL, + INTEL_460GX, + "Intel", + "460GX", + intel_i460_setup }, +#endif + #ifdef CONFIG_AGP_SIS { PCI_DEVICE_ID_SI_740, PCI_VENDOR_ID_SI, @@ -4809,6 +5432,18 @@ return -ENODEV; } +static int agp_check_supported_device(struct pci_dev *dev) { + + int i; + + for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++) { + if (dev->vendor == agp_bridge_info[i].vendor_id && + dev->device == agp_bridge_info[i].device_id) + return 1; + } + + return 0; +} /* Supported Device Scanning routine */ @@ -4817,8 +5452,14 @@ struct pci_dev *dev = NULL; u8 cap_ptr = 0x00; - if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) - return -ENODEV; + /* + * Some systems have multiple host bridges, so + * we can't just use the first one we find. + */ + do { + if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev)) == NULL) + return -ENODEV; + } while (!agp_check_supported_device(dev)); agp_bridge.dev = dev; @@ -5077,17 +5718,17 @@ } if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page = agp_bridge.agp_alloc_page(); + unsigned long addr; + addr = agp_bridge.agp_alloc_page(); - if (agp_bridge.scratch_page == 0) { + if (addr == 0) { printk(KERN_ERR PFX "unable to get memory for " "scratch page.\n"); return -ENOMEM; } + agp_bridge.scratch_page_real = virt_to_phys((void *) addr); agp_bridge.scratch_page = - virt_to_phys((void *) agp_bridge.scratch_page); - agp_bridge.scratch_page = - agp_bridge.mask_memory(agp_bridge.scratch_page, 0); + agp_bridge.mask_memory(agp_bridge.scratch_page_real, 0); } size_value = agp_bridge.fetch_size(); @@ -5129,9 +5770,8 @@ err_out: if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page &= ~(0x00000fff); agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(agp_bridge.scratch_page)); + phys_to_virt(agp_bridge.scratch_page_real)); } if (got_gatt) agp_bridge.free_gatt_table(); @@ -5149,9 +5789,8 @@ vfree(agp_bridge.key_list); if (agp_bridge.needs_scratch_page == TRUE) { - agp_bridge.scratch_page &= ~(0x00000fff); agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(agp_bridge.scratch_page)); + phys_to_virt(agp_bridge.scratch_page_real)); } } diff -Nru a/mm/memory.c b/mm/memory.c --- a/mm/memory.c Wed Oct 8 09:08:05 2003 +++ b/mm/memory.c Wed Oct 8 09:08:05 2003 @@ -121,7 +121,7 @@ pmd = pmd_offset(dir, 0); pgd_clear(dir); for (j = 0; j < PTRS_PER_PMD ; j++) { - prefetchw(pmd+j+(PREFETCH_STRIDE/16)); + prefetchw(pmd + j + PREFETCH_STRIDE/sizeof(*pmd)); free_one_pmd(pmd+j); } pmd_free(pmd); diff -Nru a/mm/mmap.c b/mm/mmap.c --- a/mm/mmap.c Wed Oct 8 09:08:05 2003 +++ b/mm/mmap.c Wed Oct 8 09:08:05 2003 @@ -900,6 +900,8 @@ break; } no_mmaps: + if (last < first) + return; /* * If the PGD bits are not consecutive in the virtual address, the * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.